max_xtp();
local_irq_disable();
- idle_task_exit();
+ idle_domain_exit();
ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
/*
* The above is a point of no-return, the processor is
*/
void vtm_domain_out(VCPU *vcpu)
{
- if(!is_idle_task(vcpu->domain))
+ if(!is_idle_domain(vcpu->domain))
rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer);
}
{
vtime_t *vtm;
- if(!is_idle_task(vcpu->domain)) {
+ if(!is_idle_domain(vcpu->domain)) {
vtm=&(vcpu->arch.arch_vmx.vtm);
vtm_interruption_update(vcpu, vtm);
}
struct domain *d = current->domain;
struct vcpu *v = current;
// FIXME: Will this work properly if doing an RFI???
- if (!is_idle_task(d) ) { // always comes from guest
+ if (!is_idle_domain(d) ) { // always comes from guest
extern void vmx_dorfirfi(void);
struct pt_regs *user_regs = vcpu_regs(current);
if (local_softirq_pending())
struct domain *d = current->domain;
struct vcpu *v = current;
// FIXME: Will this work properly if doing an RFI???
- if (!is_idle_task(d) && user_mode(regs)) {
+ if (!is_idle_domain(d) && user_mode(regs)) {
//vcpu_poke_timer(v);
if (vcpu_deliverable_interrupts(v))
reflect_extint(regs);
/* gloss over the wraparound problem for now... we know it exists
* but it doesn't matter right now */
- if (is_idle_task(vcpu->domain)) {
+ if (is_idle_domain(vcpu->domain)) {
// printf("****** vcpu_set_next_timer called during idle!!\n");
vcpu_safe_set_itm(s);
return;
ia64_set_iva(&ia64_ivt);
ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
VHPT_ENABLED);
- if (!is_idle_task(current->domain)) {
+ if (!is_idle_domain(current->domain)) {
load_region_regs(current);
vcpu_load_kernel_regs(current);
if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
char saved_command_line[COMMAND_LINE_SIZE];
-struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu };
+struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu };
cpumask_t cpu_present_map;
panic("Could not set up DOM0 guest OS\n");
/* PIN domain0 on CPU 0. */
- dom0->vcpu[0]->cpumap=1;
- set_bit(_VCPUF_cpu_pinned, &dom0->vcpu[0]->vcpu_flags);
+ dom0->vcpu[0]->cpumask = cpumask_of_cpu(0);
#ifdef CLONE_DOMAIN0
{
vcpu_wake(dom0->vcpu[0]);
}
}
- if (!is_idle_task(current->domain)) {
+ if (!is_idle_domain(current->domain)) {
if (vcpu_timer_expired(current)) {
vcpu_pend_timer(current);
// ensure another timer interrupt happens even if domain doesn't
} __cacheline_aligned;
static struct percpu_ctxt percpu_ctxt[NR_CPUS];
-static void continue_idle_task(struct vcpu *v)
+static void continue_idle_domain(struct vcpu *v)
{
reset_stack_and_jump(idle_loop);
}
-static void continue_nonidle_task(struct vcpu *v)
+static void continue_nonidle_domain(struct vcpu *v)
{
reset_stack_and_jump(ret_from_intr);
}
{
struct vcpu *v = current;
- ASSERT(is_idle_task(v->domain));
+ ASSERT(is_idle_domain(v->domain));
percpu_ctxt[smp_processor_id()].curr_vcpu = v;
cpu_set(smp_processor_id(), v->domain->cpumask);
- v->arch.schedule_tail = continue_idle_task;
+ v->arch.schedule_tail = continue_idle_domain;
reset_stack_and_jump(idle_loop);
}
int i;
#endif
- if ( is_idle_task(d) )
+ if ( is_idle_domain(d) )
return 0;
d->arch.ioport_caps =
return rc;
}
- v->arch.schedule_tail = continue_nonidle_task;
+ v->arch.schedule_tail = continue_nonidle_domain;
memset(d->shared_info, 0, PAGE_SIZE);
v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
- v->cpumap = CPUMAP_RUNANYWHERE;
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
struct vcpu *p = percpu_ctxt[cpu].curr_vcpu;
struct vcpu *n = current;
- if ( !is_idle_task(p->domain) )
+ if ( !is_idle_domain(p->domain) )
{
memcpy(&p->arch.guest_context.user_regs,
stack_regs,
save_segments(p);
}
- if ( !is_idle_task(n->domain) )
+ if ( !is_idle_domain(n->domain) )
{
memcpy(stack_regs,
&n->arch.guest_context.user_regs,
set_current(next);
- if ( (percpu_ctxt[cpu].curr_vcpu != next) && !is_idle_task(next->domain) )
+ if ( (percpu_ctxt[cpu].curr_vcpu != next) &&
+ !is_idle_domain(next->domain) )
{
__context_switch();
percpu_ctxt[cpu].context_not_finalised = 1;
struct vcpu idle0_vcpu = {
processor: 0,
+ cpu_affinity:CPU_MASK_CPU0,
domain: &idle0_domain
};
#endif
EXPORT_SYMBOL(mmu_cr4_features);
-struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu };
+struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu };
int acpi_disabled;
extern void percpu_traps_init(void);
- set_current(idle_task[cpu]);
+ set_current(idle_domain[cpu]);
set_processor_id(cpu);
percpu_traps_init();
if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
panic("failed 'createdomain' for CPU %d", cpu);
- v = idle_task[cpu] = idle->vcpu[0];
+ v = idle_domain[cpu] = idle->vcpu[0];
set_bit(_DOMF_idle_domain, &idle->domain_flags);
break;
}
- v->cpumap = op->u.pincpudomain.cpumap;
+ memcpy(cpus_addr(v->cpu_affinity),
+ &op->u.pincpudomain.cpumap,
+ min((int)BITS_TO_LONGS(NR_CPUS),
+ (int)sizeof(op->u.pincpudomain.cpumap)));
- if ( v->cpumap == CPUMAP_RUNANYWHERE )
- {
- clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
- }
- else
- {
- /* pick a new cpu from the usable map */
- int new_cpu;
- new_cpu = (int)find_first_set_bit(v->cpumap) % num_online_cpus();
- vcpu_pause(v);
- vcpu_migrate_cpu(v, new_cpu);
- set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
- vcpu_unpause(v);
- }
+ vcpu_pause(v);
+ vcpu_migrate_cpu(v, first_cpu(v->cpu_affinity));
+ vcpu_unpause(v);
put_domain(d);
}
op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags);
op->u.getvcpuinfo.cpu_time = v->cpu_time;
op->u.getvcpuinfo.cpu = v->processor;
- op->u.getvcpuinfo.cpumap = v->cpumap;
+ op->u.getvcpuinfo.cpumap = 0;
+ memcpy(&op->u.getvcpuinfo.cpumap,
+ cpus_addr(v->cpu_affinity),
+ min((int)BITS_TO_LONGS(NR_CPUS),
+ (int)sizeof(op->u.getvcpuinfo.cpumap)));
ret = 0;
if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
else
set_bit(_DOMF_ctrl_pause, &d->domain_flags);
- if ( !is_idle_task(d) &&
+ if ( !is_idle_domain(d) &&
((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) )
goto fail1;
(arch_do_createdomain(v) != 0) )
goto fail3;
- if ( !is_idle_task(d) )
+ if ( !is_idle_domain(d) )
{
write_lock(&domlist_lock);
pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
einf->vcpu = v;
- if ( is_idle_task(v->domain) )
+ if ( is_idle_domain(v->domain) )
{
einf->avt = einf->evt = ~0U;
BUG_ON(__task_on_runqueue(v));
((einf->evt - curr_evt) / BVT_INFO(curr->domain)->mcu_advance) +
ctx_allow;
- if ( is_idle_task(curr->domain) || (einf->evt <= curr_evt) )
+ if ( is_idle_domain(curr->domain) || (einf->evt <= curr_evt) )
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
else if ( schedule_data[cpu].s_timer.expires > r_time )
set_ac_timer(&schedule_data[cpu].s_timer, r_time);
ASSERT(prev_einf != NULL);
ASSERT(__task_on_runqueue(prev));
- if ( likely(!is_idle_task(prev->domain)) )
+ if ( likely(!is_idle_domain(prev->domain)) )
{
prev_einf->avt = calc_avt(prev, now);
prev_einf->evt = calc_evt(prev, prev_einf->avt);
}
/* work out time for next run through scheduler */
- if ( is_idle_task(next->domain) )
+ if ( is_idle_domain(next->domain) )
{
r_time = ctx_allow;
goto sched_done;
}
- if ( (next_prime == NULL) || is_idle_task(next_prime->domain) )
+ if ( (next_prime == NULL) || is_idle_domain(next_prime->domain) )
{
/* We have only one runnable task besides the idle task. */
r_time = 10 * ctx_allow; /* RN: random constant */
INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q]));
INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q]));
- if (!is_idle_task(d->domain)) {
+ if (!is_idle_domain(d->domain)) {
extraq_check(d);
} else {
EDOM_INFO(d)->deadl_abs = 0;
struct task_slice ret;
/*idle tasks don't need any of the following stuf*/
- if (is_idle_task(current->domain))
+ if (is_idle_domain(current->domain))
goto check_waitq;
/* create local state of the status of the domain, in order to avoid
static void sedf_sleep(struct vcpu *d) {
PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
- if (is_idle_task(d->domain))
+ if (is_idle_domain(d->domain))
return;
EDOM_INFO(d)->status |= SEDF_ASLEEP;
#define DOMAIN_IDLE 4
static inline int get_run_type(struct vcpu* d) {
struct sedf_vcpu_info* inf = EDOM_INFO(d);
- if (is_idle_task(d->domain))
+ if (is_idle_domain(d->domain))
return DOMAIN_IDLE;
if (inf->status & EXTRA_RUN_PEN)
return DOMAIN_EXTRA_PEN;
PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id,
d->vcpu_id);
- if (unlikely(is_idle_task(d->domain)))
+ if (unlikely(is_idle_domain(d->domain)))
return;
if ( unlikely(__task_on_queue(d)) ) {
v->vcpu_id = vcpu_id;
v->processor = cpu_id;
atomic_set(&v->pausecnt, 0);
- v->cpumap = CPUMAP_RUNANYWHERE;
+
+ v->cpu_affinity = is_idle_domain(d) ?
+ cpumask_of_cpu(cpu_id) : CPU_MASK_ALL;
d->vcpu[vcpu_id] = v;
/* Initialise the per-domain timer. */
init_ac_timer(&v->timer, dom_timer_fn, v, v->processor);
- if ( is_idle_task(d) )
+ if ( is_idle_domain(d) )
{
schedule_data[v->processor].curr = v;
schedule_data[v->processor].idle = v;
prev->wokenup = NOW();
#if defined(WAKE_HISTO)
- if ( !is_idle_task(next->domain) && next->wokenup )
+ if ( !is_idle_domain(next->domain) && next->wokenup )
{
ulong diff = (ulong)(now - next->wokenup);
diff /= (ulong)MILLISECS(1);
next->wokenup = (s_time_t)0;
#elif defined(BLOCKTIME_HISTO)
prev->lastdeschd = now;
- if ( !is_idle_task(next->domain) )
+ if ( !is_idle_domain(next->domain) )
{
ulong diff = (ulong)((now - next->lastdeschd) / MILLISECS(10));
if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++;
prev->sleep_tick = schedule_data[cpu].tick;
/* Ensure that the domain has an up-to-date time base. */
- if ( !is_idle_task(next->domain) )
+ if ( !is_idle_domain(next->domain) )
{
update_dom_time(next);
if ( next->sleep_tick != schedule_data[cpu].tick )
int idle_cpu(int cpu)
{
struct vcpu *p = schedule_data[cpu].curr;
- return p == idle_task[cpu];
+ return p == idle_domain[cpu];
}
schedule_data[cpu].tick++;
- if ( !is_idle_task(v->domain) )
+ if ( !is_idle_domain(v->domain) )
{
update_dom_time(v);
send_guest_virq(v, VIRQ_TIMER);
init_ac_timer(&t_timer[i], t_timer_fn, NULL, i);
}
- schedule_data[0].curr = idle_task[0];
- schedule_data[0].idle = idle_task[0];
+ schedule_data[0].curr = idle_domain[0];
+ schedule_data[0].idle = idle_domain[0];
for ( i = 0; schedulers[i] != NULL; i++ )
{
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
- rc = SCHED_OP(alloc_task, idle_task[0]);
+ rc = SCHED_OP(alloc_task, idle_domain[0]);
BUG_ON(rc < 0);
- sched_add_domain(idle_task[0]);
+ sched_add_domain(idle_domain[0]);
}
/*
int evtchn_init(struct domain *d);
void evtchn_destroy(struct domain *d);
-#define CPUMAP_RUNANYWHERE 0xFFFFFFFF
-
struct vcpu
{
int vcpu_id;
atomic_t pausecnt;
- cpumap_t cpumap; /* which cpus this domain can run on */
+ cpumask_t cpu_affinity;
struct arch_vcpu arch;
};
extern struct domain idle0_domain;
extern struct vcpu idle0_vcpu;
-extern struct vcpu *idle_task[NR_CPUS];
+extern struct vcpu *idle_domain[NR_CPUS];
#define IDLE_DOMAIN_ID (0x7FFFU)
-#define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
+#define is_idle_domain(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
struct vcpu *alloc_vcpu(
struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
/* Currently running on a CPU? */
#define _VCPUF_running 3
#define VCPUF_running (1UL<<_VCPUF_running)
- /* Disables auto-migration between CPUs. */
-#define _VCPUF_cpu_pinned 4
-#define VCPUF_cpu_pinned (1UL<<_VCPUF_cpu_pinned)
/* Domain migrated between CPUs. */
-#define _VCPUF_cpu_migrated 5
+#define _VCPUF_cpu_migrated 4
#define VCPUF_cpu_migrated (1UL<<_VCPUF_cpu_migrated)
/* Initialization completed. */
-#define _VCPUF_initialised 6
+#define _VCPUF_initialised 5
#define VCPUF_initialised (1UL<<_VCPUF_initialised)
/* VCPU is not-runnable */
-#define _VCPUF_down 7
+#define _VCPUF_down 6
#define VCPUF_down (1UL<<_VCPUF_down)
/*